Bash cheat sheet
Navigation & File System
pwd # Print working directory
cd /path/to/dir # Change directory
cd ~ # Home directory
cd - # Previous directory
ls # List files
ls -la # List all (including hidden) with details
ls -lh # Human-readable file sizes
ls -lt # Sort by modification time (newest first)
tree -L 2 # Directory tree, 2 levels deep
File Operations
cp file.txt copy.txt # Copy file
cp -r dir/ dir_copy/ # Copy directory recursively
mv old.txt new.txt # Rename/move file
rm file.txt # Delete file
rm -r dir/ # Delete directory recursively
rm -rf dir/ # Force delete (no prompts)
mkdir mydir # Create directory
mkdir -p a/b/c # Create nested directories
touch file.txt # Create empty file / update timestamp
ln -s target link # Create symbolic link
Viewing & Editing Files
cat file.txt # Print entire file
head -n 20 file.txt # First 20 lines
tail -n 20 file.txt # Last 20 lines
tail -f log.txt # Follow file in real-time (logs)
less file.txt # Scrollable viewer (q to quit)
wc -l file.txt # Count lines
wc -w file.txt # Count words
wc -c file.txt # Count bytes
diff file1 file2 # Compare two files
Searching
# Find files
find . -name "*.py" # Find by name pattern
find . -type f -name "*.log" # Files only
find . -type d -name "src" # Directories only
find . -mtime -7 # Modified in last 7 days
find . -size +100M # Files larger than 100MB
find . -name "*.tmp" -delete # Find and delete
# Search file contents
grep "pattern" file.txt # Search in file
grep -r "pattern" . # Recursive search in directory
grep -i "pattern" file.txt # Case-insensitive
grep -n "pattern" file.txt # Show line numbers
grep -c "pattern" file.txt # Count matches
grep -l "pattern" *.py # List files with matches
grep -v "pattern" file.txt # Invert (lines NOT matching)
grep -E "foo|bar" file.txt # Extended regex (OR)
grep -o "pattern" file.txt # Print only matched parts
Text Processing
# sed — stream editor
sed 's/old/new/' file.txt # Replace first occurrence per line
sed 's/old/new/g' file.txt # Replace all occurrences
sed -i 's/old/new/g' file.txt # In-place edit
sed -n '5,10p' file.txt # Print lines 5–10
sed '/pattern/d' file.txt # Delete lines matching pattern
# awk — column processing
awk '{print $1}' file.txt # Print first column
awk '{print $1, $3}' file.txt # Print 1st and 3rd columns
awk -F',' '{print $2}' data.csv # CSV: print 2nd column
awk '{sum += $1} END {print sum}' # Sum first column
awk 'NR==5' file.txt # Print 5th line
awk 'length > 80' file.txt # Lines longer than 80 chars
# sort & uniq
sort file.txt # Sort lines alphabetically
sort -n file.txt # Sort numerically
sort -r file.txt # Reverse sort
sort -k2 file.txt # Sort by 2nd column
sort file.txt | uniq # Remove adjacent duplicates
sort file.txt | uniq -c # Count occurrences
sort file.txt | uniq -d # Show only duplicates
# cut — extract columns
cut -d',' -f1,3 data.csv # Fields 1 and 3, comma-delimited
cut -c1-10 file.txt # First 10 characters per line
# tr — translate/delete characters
echo "hello" | tr 'a-z' 'A-Z' # Uppercase → HELLO
echo "hello world" | tr -s ' ' # Squeeze repeated spaces
echo "hello123" | tr -d '0-9' # Delete digits → hello
# Other
paste file1 file2 # Merge files side by side
tee output.txt # Write stdin to file AND stdout
xargs # Build commands from stdin
Pipes & Redirection
cmd > file.txt # Redirect stdout (overwrite)
cmd >> file.txt # Redirect stdout (append)
cmd 2> error.txt # Redirect stderr
cmd 2>&1 # Redirect stderr to stdout
cmd > /dev/null 2>&1 # Discard all output
cmd1 | cmd2 # Pipe stdout of cmd1 to stdin of cmd2
cmd1 | tee file | cmd2 # Pipe + save intermediate output
Variables & Strings
name="world" # Set variable (no spaces around =)
echo "Hello $name" # Variable expansion → Hello world
echo "Path is ${HOME}" # Braces for clarity
echo 'No $expansion' # Single quotes = literal
# String operations
str="Hello World"
echo ${#str} # Length → 11
echo ${str:0:5} # Substring → Hello
echo ${str,,} # Lowercase → hello world
echo ${str^^} # Uppercase → HELLO WORLD
echo ${str/World/Bash} # Replace first → Hello Bash
echo ${str//l/L} # Replace all → HeLLo WorLd
# Default values
echo ${var:-default} # Use "default" if var is unset/empty
echo ${var:=default} # Set var to "default" if unset/empty
Arrays
arr=(one two three) # Declare array
echo ${arr[0]} # First element → one
echo ${arr[@]} # All elements → one two three
echo ${#arr[@]} # Length → 3
arr+=(four) # Append
unset arr[1] # Remove element at index 1
# Loop over array
for item in "${arr[@]}"; do
echo "$item"
done
Conditionals
# if/elif/else
if [ "$x" -eq 5 ]; then
echo "five"
elif [ "$x" -gt 5 ]; then
echo "more"
else
echo "less"
fi
# Numeric comparisons: -eq -ne -lt -le -gt -ge
# String comparisons: = != -z (empty) -n (not empty)
# File tests: -f (file exists) -d (dir exists) -r (readable) -w (writable) -x (executable)
[ -f "file.txt" ] && echo "exists" # Short-circuit AND
[ -z "$var" ] && echo "empty" # True if var is empty
[[ "$str" == *.txt ]] && echo "match" # Pattern matching (double brackets)
[[ "$str" =~ ^[0-9]+$ ]] && echo "num" # Regex matching
Loops
# For loop
for i in 1 2 3 4 5; do echo $i; done
for i in {1..10}; do echo $i; done
for i in {0..100..5}; do echo $i; done # Step by 5
for f in *.py; do echo "$f"; done # Loop over files
# C-style for
for ((i=0; i<10; i++)); do echo $i; done
# While loop
while read -r line; do
echo "$line"
done < file.txt
# Process substitution
while IFS=, read -r col1 col2 col3; do
echo "$col1 - $col2"
done < data.csv
Functions
greet() {
local name="$1" # $1 = first argument
echo "Hello, $name"
return 0 # 0 = success
}
greet "World" # Call → Hello, World
# All args: $@ (as separate words), $# (count), $? (last exit code)
Process Management
ps aux # List all processes
ps aux | grep python # Find specific process
kill PID # Send SIGTERM
kill -9 PID # Force kill (SIGKILL)
jobs # List background jobs
cmd & # Run in background
fg %1 # Bring job 1 to foreground
nohup cmd & # Run immune to hangup
Networking
curl -s URL # Fetch URL (silent)
curl -o file URL # Download to file
curl -X POST -d '{"key":"val"}' URL # POST JSON
curl -H "Authorization: Bearer $TOKEN" URL # With header
wget URL # Download file
ping -c 4 host # Ping 4 times
ssh user@host # SSH connect
scp file user@host:/path # Copy file to remote
Disk & System
df -h # Disk usage (human-readable)
du -sh * # Size of each item in current dir
du -sh dir/ # Total size of directory
free -h # Memory usage
top # Live process monitor
htop # Better process monitor
uname -a # System info
uptime # System uptime
whoami # Current user
Permissions
chmod 755 script.sh # rwxr-xr-x
chmod +x script.sh # Add execute permission
chmod -R 644 dir/ # Recursive
chown user:group file # Change owner
# Permission numbers: r=4, w=2, x=1
# 755 = rwx r-x r-x (owner/group/other)
# 644 = rw- r-- r--
Archives & Compression
tar -czf archive.tar.gz dir/ # Create gzipped tar
tar -xzf archive.tar.gz # Extract gzipped tar
tar -tf archive.tar.gz # List contents
zip -r archive.zip dir/ # Create zip
unzip archive.zip # Extract zip
gzip file # Compress (replaces original)
gunzip file.gz # Decompress
Useful One-Liners
# Count files in directory
find . -type f | wc -l
# Find largest files
du -ah . | sort -rh | head -10
# Replace in all files
find . -name "*.py" -exec sed -i 's/old/new/g' {} +
# Monitor log file for keyword
tail -f app.log | grep --line-buffered "ERROR"
# Kill process on port
lsof -ti:8080 | xargs kill
# Quick HTTP server
python3 -m http.server 8000
# Generate random string
openssl rand -hex 16
# CSV column stats
awk -F',' '{sum+=$2; n++} END {print "avg:", sum/n}' data.csv
# Parallel execution with xargs
cat urls.txt | xargs -P 4 -I {} curl -s {}
Quick Reference
| Need |
Command |
| Find a file |
find . -name "pattern" |
| Search in files |
grep -r "text" . |
| Replace in file |
sed -i 's/old/new/g' file |
| Count lines |
wc -l file |
| Sort + dedupe |
sort file | uniq |
| Extract column |
awk '{print $N}' file |
| Download file |
curl -O url or wget url |
| Watch changes |
watch -n 2 command |
| Check port |
lsof -i :PORT or ss -tlnp |
| Disk usage |
du -sh * |
| JSON pretty-print |
cat file.json | python3 -m json.tool or jq . |